From: awilliam@xenbuild.aw Date: Fri, 14 Jul 2006 17:18:36 +0000 (-0600) Subject: [IA64] optimize entry and exit path X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~15786^2~24 X-Git-Url: https://dgit.raspbian.org/%22http://www.example.com/cgi/success//%22http:/www.example.com/cgi/success/?a=commitdiff_plain;h=8913e5d22d98ec49970698643d61bc52f5ffa282;p=xen.git [IA64] optimize entry and exit path VMM save/restore r4~r7 and unat to handle virtualization fault and mmio access, but it is not necessary for other faults to save/restore these registers. This patch is to save/restore these registers only when dtlb and virtualization fault happen. Signed-off-by: Anthony Xu --- diff --git a/xen/arch/ia64/vmx/vmx_entry.S b/xen/arch/ia64/vmx/vmx_entry.S index d4d4f8e0cc..0893742345 100644 --- a/xen/arch/ia64/vmx/vmx_entry.S +++ b/xen/arch/ia64/vmx/vmx_entry.S @@ -163,24 +163,39 @@ END(ia64_leave_nested) -GLOBAL_ENTRY(ia64_leave_hypervisor) +GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) PT_REGS_UNWIND_INFO(0) /* * work.need_resched etc. mustn't get changed by this CPU before it returns to ;; * user- or fsys-mode, hence we disable interrupts early on: */ + adds r2 = PT(R4)+16,r12 + adds r3 = PT(R5)+16,r12 + adds r8 = PT(EML_UNAT)+16,r12 + ;; + ld8 r8 = [r8] + ;; + mov ar.unat=r8 + ;; + ld8.fill r4=[r2],16 //load r4 + ld8.fill r5=[r3],16 //load r5 + ;; + ld8.fill r6=[r2] //load r6 + ld8.fill r7=[r3] //load r7 + ;; +END(ia64_leave_hypervisor_prepare) +//fall through +GLOBAL_ENTRY(ia64_leave_hypervisor) + PT_REGS_UNWIND_INFO(0) rsm psr.i ;; alloc loc0=ar.pfs,0,1,1,0 - adds out0=16,r12 - adds r7 = PT(EML_UNAT)+16,r12 ;; - ld8 r7 = [r7] + adds out0=16,r12 br.call.sptk.many b0=leave_hypervisor_tail ;; mov ar.pfs=loc0 - mov ar.unat=r7 adds r20=PT(PR)+16,r12 ;; lfetch [r20],PT(CR_IPSR)-PT(PR) @@ -245,12 +260,6 @@ GLOBAL_ENTRY(ia64_leave_hypervisor) ldf.fill f10=[r2],32 ldf.fill f11=[r3],24 ;; - ld8.fill r4=[r2],16 //load r4 - ld8.fill r5=[r3],16 //load r5 - ;; - ld8.fill r6=[r2] //load r6 - ld8.fill r7=[r3] //load r7 - ;; srlz.i // ensure interruption collection is off ;; bsw.0 diff --git a/xen/arch/ia64/vmx/vmx_ivt.S b/xen/arch/ia64/vmx/vmx_ivt.S index 13b2764778..addf8e4e6e 100644 --- a/xen/arch/ia64/vmx/vmx_ivt.S +++ b/xen/arch/ia64/vmx/vmx_ivt.S @@ -201,7 +201,7 @@ vmx_itlb_loop: ;; vmx_itlb_out: mov r19 = 1 - br.sptk vmx_dispatch_tlb_miss + br.sptk vmx_dispatch_itlb_miss VMX_FAULT(1); END(vmx_itlb_miss) @@ -275,7 +275,7 @@ vmx_dtlb_loop: ;; vmx_dtlb_out: mov r19 = 2 - br.sptk vmx_dispatch_tlb_miss + br.sptk vmx_dispatch_dtlb_miss VMX_FAULT(2); END(vmx_dtlb_miss) @@ -1041,9 +1041,10 @@ ENTRY(vmx_dispatch_virtualization_fault) srlz.i // guarantee that interruption collection is on ;; (p15) ssm psr.i // restore psr.i - movl r14=ia64_leave_hypervisor + movl r14=ia64_leave_hypervisor_prepare ;; VMX_SAVE_REST + VMX_SAVE_EXTRA mov rp=r14 ;; adds out1=16,sp //regs @@ -1070,7 +1071,7 @@ ENTRY(vmx_dispatch_vexirq) br.call.sptk.many b6=vmx_vexirq END(vmx_dispatch_vexirq) -ENTRY(vmx_dispatch_tlb_miss) +ENTRY(vmx_dispatch_itlb_miss) VMX_SAVE_MIN_WITH_COVER_R19 alloc r14=ar.pfs,0,0,3,0 mov out0=cr.ifa @@ -1089,8 +1090,29 @@ ENTRY(vmx_dispatch_tlb_miss) ;; adds out2=16,r12 br.call.sptk.many b6=vmx_hpw_miss -END(vmx_dispatch_tlb_miss) +END(vmx_dispatch_itlb_miss) +ENTRY(vmx_dispatch_dtlb_miss) + VMX_SAVE_MIN_WITH_COVER_R19 + alloc r14=ar.pfs,0,0,3,0 + mov out0=cr.ifa + mov out1=r15 + adds r3=8,r2 // set up second base pointer + ;; + ssm psr.ic + ;; + srlz.i // guarantee that interruption collection is on + ;; + (p15) ssm psr.i // restore psr.i + movl r14=ia64_leave_hypervisor_prepare + ;; + VMX_SAVE_REST + VMX_SAVE_EXTRA + mov rp=r14 + ;; + adds out2=16,r12 + br.call.sptk.many b6=vmx_hpw_miss +END(vmx_dispatch_dtlb_miss) ENTRY(vmx_dispatch_break_fault) VMX_SAVE_MIN_WITH_COVER_R19 diff --git a/xen/arch/ia64/vmx/vmx_minstate.h b/xen/arch/ia64/vmx/vmx_minstate.h index 18b0ea18e9..f7e676f66a 100644 --- a/xen/arch/ia64/vmx/vmx_minstate.h +++ b/xen/arch/ia64/vmx/vmx_minstate.h @@ -260,25 +260,28 @@ stf.spill [r3]=f9,32; \ ;; \ stf.spill [r2]=f10,32; \ - stf.spill [r3]=f11,24; \ - ;; \ -.mem.offset 0,0; st8.spill [r2]=r4,16; \ -.mem.offset 8,0; st8.spill [r3]=r5,16; \ - ;; \ -.mem.offset 0,0; st8.spill [r2]=r6,16; \ -.mem.offset 8,0; st8.spill [r3]=r7; \ - adds r25=PT(B7)-PT(R7),r3; \ + stf.spill [r3]=f11; \ + adds r25=PT(B7)-PT(F11),r3; \ ;; \ st8 [r24]=r18,16; /* b6 */ \ st8 [r25]=r19,16; /* b7 */ \ + adds r3=PT(R5)-PT(F11),r3; \ ;; \ st8 [r24]=r9; /* ar.csd */ \ - mov r26=ar.unat; \ - ;; \ st8 [r25]=r10; /* ar.ssd */ \ - st8 [r2]=r26; /* eml_unat */ \ ;; +#define VMX_SAVE_EXTRA \ +.mem.offset 0,0; st8.spill [r2]=r4,16; \ +.mem.offset 8,0; st8.spill [r3]=r5,16; \ + ;; \ +.mem.offset 0,0; st8.spill [r2]=r6,16; \ +.mem.offset 8,0; st8.spill [r3]=r7; \ + ;; \ + mov r26=ar.unat; \ + ;; \ + st8 [r2]=r26; /* eml_unat */ \ + #define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,) #define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) #define VMX_SAVE_MIN VMX_DO_SAVE_MIN( , mov r30=r0, )